[IA64] vti save-restore: clean up of PV region register handling.
authorAlex Williamson <alex.williamson@hp.com>
Wed, 7 Nov 2007 17:07:06 +0000 (10:07 -0700)
committerAlex Williamson <alex.williamson@hp.com>
Wed, 7 Nov 2007 17:07:06 +0000 (10:07 -0700)
Fix rr handling to avoid resrved registers/field fault in xen

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
xen/arch/ia64/vmx/vmx_utility.c
xen/arch/ia64/vmx/vmx_vcpu.c
xen/arch/ia64/xen/domain.c
xen/arch/ia64/xen/regionreg.c
xen/arch/ia64/xen/vcpu.c
xen/include/asm-ia64/regionreg.h

index 1cf8619fbecbb4927c7980b8e19358ed7e92a1fc..7bc734f48e82c935c2e6bde848b42f70b5add353 100644 (file)
@@ -637,10 +637,9 @@ int is_reserved_itir_field(VCPU* vcpu, u64 itir)
        return 0;
 }
 
-int is_reserved_rr_field(VCPU* vcpu, u64 reg_value)
+static int __is_reserved_rr_field(u64 reg_value)
 {
-    ia64_rr rr;
-    rr.rrval = reg_value;
+    ia64_rr rr = { .rrval = reg_value };
 
     if(rr.reserved0 != 0 || rr.reserved1 != 0){
         return 1;
@@ -656,3 +655,20 @@ int is_reserved_rr_field(VCPU* vcpu, u64 reg_value)
     return 0;
 }
 
+int is_reserved_rr_rid(VCPU* vcpu, u64 reg_value)
+{
+    ia64_rr rr = { .rrval = reg_value };
+
+    if (rr.rid >= (1UL << vcpu->domain->arch.rid_bits))
+        return 1;
+
+    return 0;
+}
+
+int is_reserved_rr_field(VCPU* vcpu, u64 reg_value)
+{
+    if (__is_reserved_rr_field(reg_value))
+        return 1;
+
+    return is_reserved_rr_rid(vcpu, reg_value);
+}
index 63d2e945fa0800ee378d23057c0c79f1284511f8..0fd57b060deabb9a32c2116b85f5ced00d82e69a 100644 (file)
@@ -161,12 +161,12 @@ IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
 
 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
 {
-    ia64_rr newrr;
     u64 rrval;
 
-    newrr.rrval=val;
-    if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits))
-        panic_domain (NULL, "use of invalid rid %x\n", newrr.rid);
+    if (unlikely(is_reserved_rr_rid(vcpu, val))) {
+        gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);
+        return IA64_RSVDREG_FAULT;
+    }
 
     VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
     switch((u64)(reg>>VRN_SHIFT)) {
index 5d41234cc80a17cd67a8295d3b4c5c5f010356a8..08b642694ed4abb271669410f4ae5282dd5d42c6 100644 (file)
@@ -1627,6 +1627,7 @@ domain_set_shared_info_va (unsigned long va)
 {
        struct vcpu *v = current;
        struct domain *d = v->domain;
+       int rc;
 
        /* Check virtual address:
           must belong to region 7,
@@ -1648,9 +1649,10 @@ domain_set_shared_info_va (unsigned long va)
        __ia64_per_cpu_var(current_psr_ic_addr) = (int *)(va + XSI_PSR_IC_OFS);
 
        /* Remap the shared pages.  */
-       set_one_rr (7UL << 61, PSCB(v,rrs[7]));
+       rc = !set_one_rr(7UL << 61, PSCB(v,rrs[7]));
+       BUG_ON(rc);
 
-       return 0;
+       return rc;
 }
 
 /* Transfer and clear the shadow bitmap in 1kB chunks for L1 cache. */
index 2588a55d7809508d2cf42c3fc0686a2c51ca26c1..aa3d9b94e83b1b31c9a3dc8509f2ef98e70c5e61 100644 (file)
@@ -238,14 +238,12 @@ int set_one_rr(unsigned long rr, unsigned long val)
        ia64_rr rrv, newrrv, memrrv;
        unsigned long newrid;
 
-       if (val == -1)
-               return 1;
-
        rrv.rrval = val;
        newrrv.rrval = 0;
        newrid = v->arch.starting_rid + rrv.rid;
 
-       if (newrid > v->arch.ending_rid) {
+       // avoid reserved register/field fault
+       if (unlikely(is_reserved_rr_field(v, val))) {
                printk("can't set rr%d to %lx, starting_rid=%x,"
                        "ending_rid=%x, val=%lx\n", (int) rreg, newrid,
                        v->arch.starting_rid,v->arch.ending_rid,val);
@@ -295,12 +293,11 @@ void init_all_rr(struct vcpu *v)
        ia64_rr rrv;
 
        rrv.rrval = 0;
-       //rrv.rrval = v->domain->arch.metaphysical_rr0;
        rrv.ps = v->arch.vhpt_pg_shift;
        rrv.ve = 1;
        if (!v->vcpu_info)
                panic("Stopping in init_all_rr\n");
-       VCPU(v,rrs[0]) = -1;
+       VCPU(v,rrs[0]) = rrv.rrval;
        VCPU(v,rrs[1]) = rrv.rrval;
        VCPU(v,rrs[2]) = rrv.rrval;
        VCPU(v,rrs[3]) = rrv.rrval;
@@ -308,7 +305,7 @@ void init_all_rr(struct vcpu *v)
        VCPU(v,rrs[5]) = rrv.rrval;
        rrv.ve = 0; 
        VCPU(v,rrs[6]) = rrv.rrval;
-//     v->shared_info->arch.rrs[7] = rrv.rrval;
+       VCPU(v,rrs[7]) = rrv.rrval;
 }
 
 
index 74aac37b6bcb3b315b04576b4510fb90cec37ee7..6dfcb0f5ee7bb14c88f132a376408f7616608057 100644 (file)
@@ -287,7 +287,7 @@ static void vcpu_set_metaphysical_mode(VCPU * vcpu, BOOLEAN newmode)
                PSCB(vcpu, metaphysical_mode) = newmode;
                if (newmode)
                        set_metaphysical_rr0();
-               else if (PSCB(vcpu, rrs[0]) != -1)
+               else
                        set_virtual_rr0();
        }
 }
@@ -2095,9 +2095,16 @@ unsigned long vcpu_get_rr_ve(VCPU * vcpu, u64 vadr)
 
 IA64FAULT vcpu_set_rr(VCPU * vcpu, u64 reg, u64 val)
 {
+       if (unlikely(is_reserved_rr_field(vcpu, val))) {
+               gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);
+               return IA64_RSVDREG_FAULT;
+       }
+
        PSCB(vcpu, rrs)[reg >> 61] = val;
-       if (vcpu == current)
-               set_one_rr(reg, val);
+       if (likely(vcpu == current)) {
+               int rc = set_one_rr(reg, val);
+               BUG_ON(rc == 0);
+       }
        return IA64_NO_FAULT;
 }
 
@@ -2120,17 +2127,30 @@ IA64FAULT vcpu_set_rr0_to_rr4(VCPU * vcpu, u64 val0, u64 val1, u64 val2,
        u64 reg3 = 0x6000000000000000UL;
        u64 reg4 = 0x8000000000000000UL;
 
+       if (unlikely(is_reserved_rr_field(vcpu, val0) ||
+                    is_reserved_rr_field(vcpu, val1) ||
+                    is_reserved_rr_field(vcpu, val2) ||
+                    is_reserved_rr_field(vcpu, val3) ||
+                    is_reserved_rr_field(vcpu, val4))) {
+               gdprintk(XENLOG_DEBUG,
+                        "use of invalid rrval %lx %lx %lx %lx %lx\n",
+                        val0, val1, val2, val3, val4);
+               return IA64_RSVDREG_FAULT;
+       }
+
        PSCB(vcpu, rrs)[reg0 >> 61] = val0;
        PSCB(vcpu, rrs)[reg1 >> 61] = val1;
        PSCB(vcpu, rrs)[reg2 >> 61] = val2;
        PSCB(vcpu, rrs)[reg3 >> 61] = val3;
        PSCB(vcpu, rrs)[reg4 >> 61] = val4;
-       if (vcpu == current) {
-               set_one_rr(reg0, val0);
-               set_one_rr(reg1, val1);
-               set_one_rr(reg2, val2);
-               set_one_rr(reg3, val3);
-               set_one_rr(reg4, val4);
+       if (likely(vcpu == current)) {
+               int rc;
+               rc  = !set_one_rr(reg0, val0);
+               rc |= !set_one_rr(reg1, val1);
+               rc |= !set_one_rr(reg2, val2);
+               rc |= !set_one_rr(reg3, val3);
+               rc |= !set_one_rr(reg4, val4);
+               BUG_ON(rc != 0);
        }
        return IA64_NO_FAULT;
 }
index 9651254a8d35109355ea390661726f8ff2abc9a0..0eea1b1342e2bea49a1492bf6bc7df5961ff1b2e 100644 (file)
@@ -1,3 +1,4 @@
+
 #ifndef _REGIONREG_H_
 #define _REGIONREG_H_
 
@@ -85,6 +86,9 @@ extern void set_metaphysical_rr0(void);
 
 extern void load_region_regs(struct vcpu *v);
 
+extern int is_reserved_rr_rid(struct vcpu *vcpu, u64 reg_value);
+extern int is_reserved_rr_field(struct vcpu *vcpu, u64 reg_value);
+
 #endif /* !_REGIONREG_H_ */
 
 /*